Times divergence algorithms

Created by Mauro Alberti

Last run: 2019-06-16


In [1]:
import numpy as np

In [2]:
def divergence(F):
    """ compute the divergence of n-D scalar field `F` """
    return reduce(np.add,np.gradient(F))

In [3]:
F = np.random.rand(100,100, 2)

In [4]:
F


Out[4]:
array([[[2.69541387e-01, 8.10113826e-01],
        [7.95465770e-01, 6.25728396e-01],
        [5.41488845e-01, 3.69856759e-01],
        ...,
        [3.67561919e-01, 2.10709632e-02],
        [6.64973970e-01, 7.90978987e-01],
        [3.12721952e-02, 1.44094003e-01]],

       [[2.83623106e-01, 5.51848951e-01],
        [1.07060241e-01, 9.62973326e-01],
        [5.74109066e-01, 9.90534360e-01],
        ...,
        [2.40565466e-01, 9.56963306e-01],
        [3.11033351e-01, 3.96612658e-01],
        [1.60559485e-01, 4.82408966e-02]],

       [[9.12693959e-01, 9.42098363e-01],
        [6.71576911e-03, 8.09364637e-01],
        [3.26599107e-01, 8.82337464e-01],
        ...,
        [4.54795744e-01, 2.59673358e-01],
        [3.25704001e-01, 7.56919240e-01],
        [3.83572203e-02, 3.84145139e-01]],

       ...,

       [[5.55437805e-01, 4.00813040e-01],
        [9.22288494e-01, 8.82000365e-01],
        [8.05756422e-01, 3.81736278e-01],
        ...,
        [3.71397912e-01, 4.06594489e-01],
        [2.83343034e-01, 1.89966647e-01],
        [3.45453443e-01, 2.54570555e-01]],

       [[2.38018545e-01, 3.76268537e-01],
        [4.65982735e-01, 4.08912319e-01],
        [4.29906959e-01, 5.75999876e-02],
        ...,
        [3.34431749e-01, 1.96492791e-01],
        [8.63118584e-01, 1.07177788e-01],
        [1.85761123e-01, 7.15785200e-01]],

       [[9.46110161e-01, 1.41352359e-01],
        [3.88569751e-01, 6.58143221e-01],
        [9.02726474e-03, 3.31361285e-01],
        ...,
        [9.55881215e-01, 5.38569342e-01],
        [8.57063761e-04, 8.84870885e-01],
        [5.92200968e-01, 1.68400086e-01]]])

In [5]:
F.ndim


Out[5]:
3

In [6]:
F.shape


Out[6]:
(100, 100, 2)

In [7]:
from functools import reduce

In [8]:
def divergence_a(field):
    "return the divergence of a n-D field"
    return np.sum(np.gradient(field),axis=0)

In [9]:
timeit divergence_a(F)


254 µs ± 15.9 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)

In [10]:
d_a = divergence(F)

In [11]:
d_a.shape


Out[11]:
(100, 100, 2)

In [12]:
def divergence_b(F):
    """ compute the divergence of n-D scalar field `F` """
    return reduce(np.add,np.gradient(F))

In [13]:
timeit divergence_b(F)


210 µs ± 11.7 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)

In [14]:
d_b = divergence_b(F)

In [15]:
d_b.shape


Out[15]:
(100, 100, 2)

In [16]:
np.allclose(d_a, d_b)


Out[16]:
True

In [17]:
F = np.random.rand(100,100, 2)

In [18]:
def my_divergence(F):
    dvx_dx = np.gradient(F[:, :, 0])[1]
    dvy_dy = -(np.gradient(F[:, :, 1])[0])
    return dvx_dx + dvy_dy

In [19]:
timeit my_divergence(F)


189 µs ± 2.64 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)

In [20]:
d_m = my_divergence(F)

In [21]:
F1, F2 = F[:, :, 0], F[:, :, 1]

In [22]:
def my_divergence_split(F1, F2):
    dvx_dx = np.gradient(F1, axis=1)
    dvy_dy = np.gradient(F2, axis=0)
    return dvx_dx - dvy_dy

In [23]:
timeit my_divergence_split(F1, F2)


107 µs ± 859 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)

In [24]:
d_s = my_divergence_split(F1, F2)

In [25]:
np.allclose(d_m, d_s)


Out[25]:
True